#endif
/* Dynamically-mapped IRQ. */
-static int time_irq;
+static int TIMER_IRQ;
static struct irqaction irq_timer = {
timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer",
BUG();
printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
- time_irq = bind_virq_to_irq(VIRQ_TIMER);
+ TIMER_IRQ = bind_virq_to_irq(VIRQ_TIMER);
- (void)setup_irq(time_irq, &irq_timer);
+ (void)setup_irq(TIMER_IRQ, &irq_timer);
}
/* Convert jiffies to system time. Call with xtime_lock held for reading. */
* Change "struct page" to physical address.
*/
#define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
-#define page_to_phys(page) (phys_to_machine(page_to_pseudophys(page)))
+#define page_to_phys(page) (phys_to_machine(page_to_pseudophys(page)))
-#define bio_to_pseudophys(bio) (page_to_pseudophys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
-#define bvec_to_pseudophys(bv) (page_to_pseudophys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
+#define bio_to_pseudophys(bio) (page_to_pseudophys(bio_page((bio))) + \
+ (unsigned long) bio_offset((bio)))
+#define bvec_to_pseudophys(bv) (page_to_pseudophys((bv)->bv_page) + \
+ (unsigned long) (bv)->bv_offset)
#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
(((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \
- ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == bvec_to_pseudophys((vec2))))
+ ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \
+ bvec_to_pseudophys((vec2))))
extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
#define isa_virt_to_bus(_x) isa_virt_to_bus_is_UNSUPPORTED->x
#define isa_page_to_bus(_x) isa_page_to_bus_is_UNSUPPORTED->x
#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-#define isa_bus_to_virt(_x) (void *)__fix_to_virt(FIX_ISAMAP_BEGIN - ((_x) >> PAGE_SHIFT))
+#define isa_bus_to_virt(_x) (void *)__fix_to_virt(FIX_ISAMAP_BEGIN - \
+ ((_x) >> PAGE_SHIFT))
#else
#define isa_bus_to_virt(_x) isa_bus_to_virt_needs_PRIVILEGED_BUILD
#endif
static inline unsigned type in##bwl(int port) { \
return in##bwl##_quad(port, 0); \
}
-#else
+#else
#define __BUILDIO(bwl,bw,type) \
static inline void out##bwl(unsigned type value, int port) { \
out##bwl##_local(value, port); \
#define FIRST_DEVICE_VECTOR 0x31
#define FIRST_SYSTEM_VECTOR 0xef
-/* #define TIMER_IRQ _EVENT_TIMER */
+#define TIMER_IRQ timer_irq
/*
* 16 8259A IRQ's, 208 potential APIC interrupt sources.
* should be changed accordingly.
*/
#define NR_VECTORS 256
-
-#ifdef CONFIG_PCI_USE_VECTOR
-#define NR_IRQS FIRST_SYSTEM_VECTOR
-#define NR_IRQ_VECTORS NR_IRQS
-#else
-#ifdef CONFIG_X86_IO_APIC
-#define NR_IRQS 224
-# if (224 >= 32 * NR_CPUS)
-# define NR_IRQ_VECTORS NR_IRQS
-# else
-# define NR_IRQ_VECTORS (32 * NR_CPUS)
-# endif
-#else
-#define NR_IRQS 16
-#define NR_IRQ_VECTORS NR_IRQS
-#endif
-#endif
#endif
#define FPU_IRQ 13
* are bound using the provided bind/unbind functions.
*/
-#define PIRQ_BASE 0
-#define NR_PIRQS 128
+#define PIRQ_BASE 0
+#define NR_PIRQS 128
-#define DYNIRQ_BASE (PIRQ_BASE + NR_PIRQS)
-#define NR_DYNIRQS 128
+#define DYNIRQ_BASE (PIRQ_BASE + NR_PIRQS)
+#define NR_DYNIRQS 128
-#define NR_IRQS (NR_PIRQS + NR_DYNIRQS)
-#define NR_IRQ_VECTORS NR_IRQS
+#define NR_IRQS (NR_PIRQS + NR_DYNIRQS)
+#define NR_IRQ_VECTORS NR_IRQS
-#define pirq_to_irq(_x) ((_x) + PIRQ_BASE)
-#define irq_to_pirq(_x) ((_x) - PIRQ_BASE)
+#define pirq_to_irq(_x) ((_x) + PIRQ_BASE)
+#define irq_to_pirq(_x) ((_x) - PIRQ_BASE)
-#define dynirq_to_irq(_x) ((_x) + DYNIRQ_BASE)
-#define irq_to_dynirq(_x) ((_x) - DYNIRQ_BASE)
+#define dynirq_to_irq(_x) ((_x) + DYNIRQ_BASE)
+#define irq_to_dynirq(_x) ((_x) - DYNIRQ_BASE)
#ifndef __ASSEMBLY__
/* Dynamic binding of event channels and VIRQ sources to Linux IRQ space. */
#define deactivate_mm(tsk, mm) \
asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0))
-#define activate_mm(prev, next) \
-do { \
- switch_mm((prev),(next),NULL); \
- flush_page_update_queue(); \
-} while ( 0 )
+#define activate_mm(prev, next) do { \
+ switch_mm((prev),(next),NULL); \
+ flush_page_update_queue(); \
+} while (0)
#endif
#define mfn_to_pfn(_mfn) (machine_to_phys_mapping[(_mfn)])
static inline unsigned long phys_to_machine(unsigned long phys)
{
- unsigned long machine = pfn_to_mfn(phys >> PAGE_SHIFT);
- machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
- return machine;
+ unsigned long machine = pfn_to_mfn(phys >> PAGE_SHIFT);
+ machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
+ return machine;
}
static inline unsigned long machine_to_phys(unsigned long machine)
{
- unsigned long phys = mfn_to_pfn(machine >> PAGE_SHIFT);
- phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
- return phys;
+ unsigned long phys = mfn_to_pfn(machine >> PAGE_SHIFT);
+ phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
+ return phys;
}
/*
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
#define boot_pte_t pte_t /* or would you rather have a typedef */
-#if 0 /* XXXcl for MMU_UPDATE_DEBUG */
-static inline unsigned long pte_val(pte_t x)
-{
- unsigned long ret = x.pte_low;
- if ( (ret & 1) ) ret = machine_to_phys(ret);
- return ret;
-}
-#else
-#define pte_val(x) (((x).pte_low & 1) ? machine_to_phys((x).pte_low) : (x).pte_low)
-#endif
+#define pte_val(x) (((x).pte_low & 1) ? machine_to_phys((x).pte_low) : \
+ (x).pte_low)
#define pte_val_ma(x) ((x).pte_low)
#define HPAGE_SHIFT 22
#endif
static inline unsigned long pmd_val(pmd_t x)
{
- unsigned long ret = x.pmd;
- if ( (ret) ) ret = machine_to_phys(ret);
- return ret;
+ unsigned long ret = x.pmd;
+ if (ret) ret = machine_to_phys(ret);
+ return ret;
}
#define pgd_val(x) ({ BUG(); (unsigned long)0; })
#define pgprot_val(x) ((x).pgprot)
static inline pte_t __pte(unsigned long x)
{
- if ( (x & 1) ) x = phys_to_machine(x);
+ if (x & 1) x = phys_to_machine(x);
return ((pte_t) { (x) });
}
-#define __pte_ma(x) ((pte_t) { (x) } )
+#define __pte_ma(x) ((pte_t) { (x) } )
static inline pmd_t __pmd(unsigned long x)
{
- if ( (x & 1) ) x = phys_to_machine(x);
+ if ((x & 1)) x = phys_to_machine(x);
return ((pmd_t) { (x) });
}
-#define __pgd(x) ({ BUG(); (pgprot_t) { 0 }; })
+#define __pgd(x) ({ BUG(); (pgprot_t) { 0 }; })
#define __pgprot(x) ((pgprot_t) { (x) } )
#endif /* !__ASSEMBLY__ */
#endif /* __ASSEMBLY__ */
-/*
- * XXXcl two options for PAGE_OFFSET
- * - 0xC0000000:
- * change text offset in arch/xen/i386/kernel/vmlinux.lds.S
- * change __pa/__va macros
- * - 0xC0100000:
- * change TASK_SIZE
- */
#ifdef __ASSEMBLY__
#define __PAGE_OFFSET (0xC0000000)
#else
#define _ASMi386_PARAM_H
#ifdef __KERNEL__
-# define HZ 100/* 0 */ /* Internal kernel timer frequency */
+# define HZ 100 /* Internal kernel timer frequency */
# define USER_HZ 100 /* .. some user interfaces are in "ticks" */
# define CLOCKS_PER_SEC (USER_HZ) /* like times() */
#endif
((unsigned long long)page_to_pfn(pte) <<
(unsigned long long) PAGE_SHIFT)));
flush_page_update_queue();
- /* XXXcl queue */
}
/*
* Allocate and free page tables.
extern void pte_free(struct page *pte);
-#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
+#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
* hook is made available.
*/
#define set_pte_batched(pteptr, pteval) \
-queue_l1_entry_update(pteptr, (pteval).pte_low)
+ queue_l1_entry_update(pteptr, (pteval).pte_low)
#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
/*
}
#define pte_same(a, b) ((a).pte_low == (b).pte_low)
-/*
+/*
* We detect special mappings in one of two ways:
* 1. If the MFN is an I/O page then Xen will set the m2p entry
* to be outside our maximum possible pseudophys range.
*/
#define INVALID_P2M_ENTRY (~0UL)
#define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1)))
-#define pte_pfn(_pte) \
-({ \
- unsigned long mfn = (_pte).pte_low >> PAGE_SHIFT; \
- unsigned long pfn = mfn_to_pfn(mfn); \
- if ( (pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn) ) \
- pfn = max_mapnr; /* special: force !pfn_valid() */ \
- pfn; \
+#define pte_pfn(_pte) \
+({ \
+ unsigned long mfn = (_pte).pte_low >> PAGE_SHIFT; \
+ unsigned long pfn = mfn_to_pfn(mfn); \
+ if ((pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn)) \
+ pfn = max_mapnr; /* special: force !pfn_valid() */ \
+ pfn; \
})
#define pte_page(_pte) pfn_to_page(pte_pfn(_pte))
#define pte_none(x) (!(x).pte_low)
-
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define pfn_pte_ma(pfn, prot) __pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
#endif
-extern void * high_memory;
+extern void *high_memory;
extern unsigned long vmalloc_earlyreserve;
/*
can temporarily clear it. */
#define pmd_present(x) (pmd_val(x))
/* pmd_clear below */
-#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
+#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
#define update_mmu_cache(vma,address,pte) do { } while (0)
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-#if 0
#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
do { \
if (__dirty) { \
- queue_l1_entry_update((__ptep), (__entry).pte_low); \
- flush_tlb_page(__vma, __address); \
- xen_flush_page_update_queue(); \
+ if (likely(vma->vm_mm == current->mm)) { \
+ xen_flush_page_update_queue(); \
+ HYPERVISOR_update_va_mapping(address>>PAGE_SHIFT, \
+ entry, UVMF_INVLPG); \
+ } else { \
+ xen_l1_entry_update((__ptep), (__entry).pte_low); \
+ flush_tlb_page(__vma, __address); \
+ } \
} \
} while (0)
-#else
-#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
- do { \
- if (__dirty) { \
- if ( likely(vma->vm_mm == current->mm) ) { \
- xen_flush_page_update_queue(); \
- HYPERVISOR_update_va_mapping(address>>PAGE_SHIFT, entry, UVMF_INVLPG); \
- } else { \
- xen_l1_entry_update((__ptep), (__entry).pte_low); \
- flush_tlb_page(__vma, __address); \
- } \
- } \
- } while (0)
-
-#endif
#define __HAVE_ARCH_PTEP_ESTABLISH
#define ptep_establish(__vma, __address, __ptep, __entry) \
#define __HAVE_ARCH_PTEP_ESTABLISH_NEW
#define ptep_establish_new(__vma, __address, __ptep, __entry) \
do { \
- if ( likely((__vma)->vm_mm == current->mm) ) { \
+ if (likely((__vma)->vm_mm == current->mm)) { \
xen_flush_page_update_queue(); \
HYPERVISOR_update_va_mapping((__address)>>PAGE_SHIFT, \
__entry, 0); \
#define kern_addr_valid(addr) (1)
#endif /* !CONFIG_DISCONTIGMEM */
-#define io_remap_page_range(vma,from,phys,size,prot) \
- direct_remap_area_pages(vma->vm_mm,from,phys,size,prot,DOMID_IO)
+#define io_remap_page_range(vma,from,phys,size,prot) \
+ direct_remap_area_pages(vma->vm_mm,from,phys,size,prot,DOMID_IO)
int direct_remap_area_pages(struct mm_struct *mm,
- unsigned long address,
- unsigned long machine_addr,
- unsigned long size,
- pgprot_t prot,
- domid_t domid);
+ unsigned long address,
+ unsigned long machine_addr,
+ unsigned long size,
+ pgprot_t prot,
+ domid_t domid);
int __direct_remap_area_pages(struct mm_struct *mm,
unsigned long address,
unsigned long size,
static inline void wbinvd(void)
{
- mmu_update_t u;
- u.ptr = MMU_EXTENDED_COMMAND;
- u.val = MMUEXT_FLUSH_CACHE;
- (void)HYPERVISOR_mmu_update(&u, 1, NULL);
+ mmu_update_t u;
+ u.ptr = MMU_EXTENDED_COMMAND;
+ u.val = MMUEXT_FLUSH_CACHE;
+ (void)HYPERVISOR_mmu_update(&u, 1, NULL);
}
static inline unsigned long get_limit(unsigned long segment)
* includes these barriers, for example.
*/
-#define __cli() \
-do { \
- HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1; \
- barrier(); \
+#define __cli() \
+do { \
+ HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1; \
+ barrier(); \
} while (0)
-#define __sti() \
-do { \
- shared_info_t *_shared = HYPERVISOR_shared_info; \
- barrier(); \
- _shared->vcpu_data[0].evtchn_upcall_mask = 0; \
- barrier(); /* unmask then check (avoid races) */ \
- if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
- force_evtchn_callback(); \
+#define __sti() \
+do { \
+ shared_info_t *_shared = HYPERVISOR_shared_info; \
+ barrier(); \
+ _shared->vcpu_data[0].evtchn_upcall_mask = 0; \
+ barrier(); /* unmask then check (avoid races) */ \
+ if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
+ force_evtchn_callback(); \
} while (0)
-#define __save_flags(x) \
-do { \
- (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask; \
+#define __save_flags(x) \
+do { \
+ (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask; \
} while (0)
-#define __restore_flags(x) \
-do { \
- shared_info_t *_shared = HYPERVISOR_shared_info; \
- barrier(); \
- if ( (_shared->vcpu_data[0].evtchn_upcall_mask = (x)) == 0 ) { \
- barrier(); /* unmask then check (avoid races) */ \
- if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
- force_evtchn_callback(); \
- } \
+#define __restore_flags(x) \
+do { \
+ shared_info_t *_shared = HYPERVISOR_shared_info; \
+ barrier(); \
+ if ( (_shared->vcpu_data[0].evtchn_upcall_mask = (x)) == 0 ) { \
+ barrier(); /* unmask then check (avoid races) */ \
+ if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
+ force_evtchn_callback(); \
+ } \
} while (0)
-#define safe_halt() ((void)0)
+#define safe_halt() ((void)0)
-#define __save_and_cli(x) \
-do { \
- (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask; \
- HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1; \
- barrier(); \
+#define __save_and_cli(x) \
+do { \
+ (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask; \
+ HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1; \
+ barrier(); \
} while (0)
-#define __save_and_sti(x) \
-do { \
- shared_info_t *_shared = HYPERVISOR_shared_info; \
- barrier(); \
- (x) = _shared->vcpu_data[0].evtchn_upcall_mask; \
- _shared->vcpu_data[0].evtchn_upcall_mask = 0; \
- barrier(); /* unmask then check (avoid races) */ \
- if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
- force_evtchn_callback(); \
+#define __save_and_sti(x) \
+do { \
+ shared_info_t *_shared = HYPERVISOR_shared_info; \
+ barrier(); \
+ (x) = _shared->vcpu_data[0].evtchn_upcall_mask; \
+ _shared->vcpu_data[0].evtchn_upcall_mask = 0; \
+ barrier(); /* unmask then check (avoid races) */ \
+ if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
+ force_evtchn_callback(); \
} while (0)
#define local_irq_save(x) __save_and_cli(x)
-#define local_irq_restore(x) __restore_flags(x)
-#define local_save_flags(x) __save_flags(x)
-#define local_irq_disable() __cli()
-#define local_irq_enable() __sti()
+#define local_irq_restore(x) __restore_flags(x)
+#define local_save_flags(x) __save_flags(x)
+#define local_irq_disable() __cli()
+#define local_irq_enable() __sti()
-#define irqs_disabled() \
- HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask
+#define irqs_disabled() HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask
/*
* disable hlt during certain critical i/o operations
#include <linux/mm.h>
#include <asm/processor.h>
-#define __flush_tlb() do { \
- xen_tlb_flush(); \
-} while (/*CONSTCOND*/0)
+#define __flush_tlb() xen_tlb_flush()
/*
* Global pages have to be flushed a bit differently. Not a real
* performance problem because this does not happen often.
*/
-#define __flush_tlb_global() \
- do { \
- xen_tlb_flush(); \
- } while (0)
+#define __flush_tlb_global() xen_tlb_flush()
extern unsigned long pgkern_mask;
#define cpu_has_invlpg (boot_cpu_data.x86 > 3)
-#define __flush_tlb_single(addr) do { \
- xen_invlpg(addr); \
-} while (/* CONSTCOND */0)
+#define __flush_tlb_single(addr) xen_invlpg(addr)
-# define __flush_tlb_one(addr) __flush_tlb_single(addr)
+#define __flush_tlb_one(addr) __flush_tlb_single(addr)
/*
* TLB flushing:
/**
* alloc_skb_from_cache - allocate a network buffer
* @cp: kmem_cache from which to allocate the data area
- * (object size must be big enough for @size bytes + skb overheads)
+ * (object size must be big enough for @size bytes + skb overheads)
* @size: size to allocate
* @gfp_mask: allocation mask
*